[IA64] separate mapped_regs and vpd, handle various page size
authorawilliam@xenbuild.aw <awilliam@xenbuild.aw>
Fri, 7 Jul 2006 16:34:45 +0000 (10:34 -0600)
committerawilliam@xenbuild.aw <awilliam@xenbuild.aw>
Fri, 7 Jul 2006 16:34:45 +0000 (10:34 -0600)
Allow Xen page size != 16KB.

Decorelate XSI size and page_size.
Separate mapped_regs and vpd to save memory in non-VTi mode.

Signed-off-by: Tristan Gingold <tristan.gingold@bull.net>
xen/arch/ia64/vmx/vmx_init.c
xen/arch/ia64/xen/domain.c
xen/arch/ia64/xen/ivt.S
xen/arch/ia64/xen/privop.c
xen/arch/ia64/xen/xenasm.S
xen/arch/ia64/xen/xensetup.c
xen/include/asm-ia64/xenpage.h
xen/include/public/arch-ia64.h

index 31bab459c930865a7ec77ea16dde8dc40e150131..074c536f56c851b506daea782a3b592d2660bd95 100644 (file)
@@ -156,6 +156,7 @@ static vpd_t *alloc_vpd(void)
        int i;
        cpuid3_t cpuid3;
        vpd_t *vpd;
+       mapped_regs_t *mregs;
 
        vpd = alloc_xenheap_pages(get_order(VPD_SIZE));
        if (!vpd) {
@@ -165,23 +166,25 @@ static vpd_t *alloc_vpd(void)
 
        printk("vpd base: 0x%p, vpd size:%ld\n", vpd, sizeof(vpd_t));
        memset(vpd, 0, VPD_SIZE);
+       mregs = &vpd->vpd_low;
+
        /* CPUID init */
        for (i = 0; i < 5; i++)
-               vpd->vcpuid[i] = ia64_get_cpuid(i);
+               mregs->vcpuid[i] = ia64_get_cpuid(i);
 
        /* Limit the CPUID number to 5 */
-       cpuid3.value = vpd->vcpuid[3];
+       cpuid3.value = mregs->vcpuid[3];
        cpuid3.number = 4;      /* 5 - 1 */
-       vpd->vcpuid[3] = cpuid3.value;
+       mregs->vcpuid[3] = cpuid3.value;
 
-    vpd->vac.a_from_int_cr = 1;
-    vpd->vac.a_to_int_cr = 1;
-    vpd->vac.a_from_psr = 1;
-    vpd->vac.a_from_cpuid = 1;
-    vpd->vac.a_cover = 1;
-    vpd->vac.a_bsw = 1;
+       mregs->vac.a_from_int_cr = 1;
+       mregs->vac.a_to_int_cr = 1;
+       mregs->vac.a_from_psr = 1;
+       mregs->vac.a_from_cpuid = 1;
+       mregs->vac.a_cover = 1;
+       mregs->vac.a_bsw = 1;
 
-       vpd->vdc.d_vmsw = 1;
+       mregs->vdc.d_vmsw = 1;
 
        return vpd;
 }
@@ -201,7 +204,7 @@ static void
 vmx_create_vp(struct vcpu *v)
 {
        u64 ret;
-       vpd_t *vpd = v->arch.privregs;
+       vpd_t *vpd = (vpd_t *)v->arch.privregs;
        u64 ivt_base;
     extern char vmx_ia64_ivt;
        /* ia64_ivt is function pointer, so need this tranlation */
@@ -274,8 +277,8 @@ vmx_final_setup_guest(struct vcpu *v)
        vpd = alloc_vpd();
        ASSERT(vpd);
 
-       v->arch.privregs = vpd;
-       vpd->virt_env_vaddr = vm_buffer;
+       v->arch.privregs = (mapped_regs_t *)vpd;
+       vpd->vpd_low.virt_env_vaddr = vm_buffer;
 
        /* Per-domain vTLB and vhpt implementation. Now vmx domain will stick
         * to this solution. Maybe it can be deferred until we know created
index aa99eb86da4bd6205e9b2de7aeee8f6c877bc1d3..9fe5dec74df0df39d4d2d5ae13edfa23bcf21db6 100644 (file)
@@ -313,7 +313,8 @@ void free_vcpu_struct(struct vcpu *v)
                vmx_relinquish_vcpu_resources(v);
        else {
                if (v->arch.privregs != NULL)
-                       free_xenheap_pages(v->arch.privregs, get_order(sizeof(mapped_regs_t)));
+                       free_xenheap_pages(v->arch.privregs,
+                                     get_order_from_shift(XMAPPEDREGS_SHIFT));
        }
 
        free_xenheap_pages(v, KERNEL_STACK_SIZE_ORDER);
@@ -350,11 +351,13 @@ int arch_domain_create(struct domain *d)
        if (is_idle_domain(d))
            return 0;
 
-       if ((d->shared_info = (void *)alloc_xenheap_page()) == NULL)
+       d->shared_info = alloc_xenheap_pages(get_order_from_shift(XSI_SHIFT));
+       if (d->shared_info == NULL)
            goto fail_nomem;
-       memset(d->shared_info, 0, PAGE_SIZE);
-       share_xen_page_with_guest(virt_to_page(d->shared_info),
-                                 d, XENSHARE_writable);
+       memset(d->shared_info, 0, XSI_SIZE);
+       for (i = 0; i < XSI_SIZE; i += PAGE_SIZE)
+           share_xen_page_with_guest(virt_to_page((char *)d->shared_info + i),
+                                     d, XENSHARE_writable);
 
        d->max_pages = (128UL*1024*1024)/PAGE_SIZE; // 128MB default // FIXME
        /* We may also need emulation rid for region4, though it's unlikely
@@ -376,7 +379,7 @@ fail_nomem:
        if (d->arch.mm.pgd != NULL)
            pgd_free(d->arch.mm.pgd);
        if (d->shared_info != NULL)
-           free_xenheap_page(d->shared_info);
+           free_xenheap_pages(d->shared_info, get_order_from_shift(XSI_SHIFT));
        return -ENOMEM;
 }
 
@@ -384,7 +387,7 @@ void arch_domain_destroy(struct domain *d)
 {
        BUG_ON(d->arch.mm.pgd != NULL);
        if (d->shared_info != NULL)
-               free_xenheap_page(d->shared_info);
+           free_xenheap_pages(d->shared_info, get_order_from_shift(XSI_SHIFT));
 
        domain_flush_destroy (d);
 
index a9773fee675cf1c968d540cd298aa2767e65883f..f090ee691318bfed3a22ce709c3ceae85ae94a42 100644 (file)
@@ -591,7 +591,7 @@ GLOBAL_ENTRY(frametable_miss)
        shladd r24=r19,3,r24    // r24=&pte[pte_offset(addr)]
        ;;
 (p7)   ld8 r24=[r24]           // r24=pte[pte_offset(addr)]
-       mov r25=0x700|(_PAGE_SIZE_16K<<2) // key=7
+       mov r25=0x700|(PAGE_SHIFT<<2) // key=7
 (p6)   br.spnt.few frametable_fault
        ;;
        mov cr.itir=r25
index 44eb167308071350e4a3e9bc92784a7618272ed1..3292f9f3715179ee6b05b22559e481795643a96c 100644 (file)
@@ -700,10 +700,9 @@ ia64_hyperprivop(unsigned long iim, REGS *regs)
        UINT64 val;
        UINT64 itir, ifa;
 
-// FIXME: Handle faults appropriately for these
        if (!iim || iim > HYPERPRIVOP_MAX) {
-               panic_domain(regs, "bad hyperprivop ignored; iim=%lx, "
-                            "iip=0x%lx\n", iim, regs->cr_iip);
+               panic_domain(regs, "bad hyperprivop: iim=%lx, iip=0x%lx\n",
+                            iim, regs->cr_iip);
                return 1;
        }
        slow_hyperpriv_cnt[iim]++;
index 7400c882dba65e62ede238f466c725e378a760a7..74775321522188f26a974bc69e18d70a34b3bec8 100644 (file)
@@ -131,7 +131,7 @@ GLOBAL_ENTRY(ia64_new_rr7)
 #endif
 
        //  Shared info
-       mov r24=PAGE_SHIFT<<2
+       mov r24=XSI_SHIFT<<2
        movl r25=__pgprot(__DIRTY_BITS | _PAGE_PL_2 | _PAGE_AR_RW)
        ;;
        ptr.d   in3,r24
@@ -144,7 +144,7 @@ GLOBAL_ENTRY(ia64_new_rr7)
        
        // Map mapped_regs
        mov r22=XMAPPEDREGS_OFS
-       mov r24=PAGE_SHIFT<<2
+       mov r24=XMAPPEDREGS_SHIFT<<2
        ;; 
        add r22=r22,in3
        ;;
index d80d4f03e40f875e4c4fd321cda371fe2dbfd5de..e25cae1559f30116a6d3b5c3ccf193a4220b3723 100644 (file)
 #include <linux/efi.h>
 #include <asm/iosapic.h>
 
-/* Be sure the struct shared_info fits on a page because it is mapped in
-   domain. */
-#if SHARED_INFO_SIZE > PAGE_SIZE
- #error "struct shared_info does not not fit in PAGE_SIZE"
+/* Be sure the struct shared_info size is <= XSI_SIZE.  */
+#if SHARED_INFO_SIZE > XSI_SIZE
+#error "struct shared_info bigger than XSI_SIZE"
 #endif
 
 unsigned long xenheap_phys_end, total_pages;
index aef0dc1a03aecf538a9e570a9905df5c8a0b328d..f5b0f74e70ca8be3f444c8c5f14911e8ee8b8024 100644 (file)
@@ -60,6 +60,13 @@ static inline int get_order_from_pages(unsigned long nr_pages)
     return order;
 }
 
+static inline int get_order_from_shift(unsigned long shift)
+{
+    if (shift <= PAGE_SHIFT)
+       return 0;
+    else
+       return shift - PAGE_SHIFT;
+}
 #endif
 
 #undef __pa
index b74ca5748fb1f69e7c92a9caef9ac03f743b4f60..3471647325d49187dcb9c3e5c7d35f94cb3ebd40 100644 (file)
@@ -276,12 +276,16 @@ struct mapped_regs {
             unsigned long tmp[8]; // temp registers (e.g. for hyperprivops)
         };
     };
+};
+typedef struct mapped_regs mapped_regs_t;
+
+struct vpd {
+    struct mapped_regs vpd_low;
     unsigned long  reserved6[3456];
     unsigned long  vmm_avail[128];
     unsigned long  reserved7[4096];
 };
-typedef struct mapped_regs mapped_regs_t;
-typedef mapped_regs_t vpd_t;
+typedef struct vpd vpd_t;
 
 struct arch_vcpu_info {
 };
@@ -365,13 +369,13 @@ struct xen_ia64_boot_param {
 
 /* Address of shared_info in domain virtual space.
    This is the default address, for compatibility only.  */
-#define XSI_BASE                               0xf100000000000000
+#define XSI_BASE                       0xf100000000000000
 
 /* Size of the shared_info area (this is not related to page size).  */
-#define XSI_LOG_SIZE                   14
-#define XSI_SIZE                               (1 << XSI_LOG_SIZE)
+#define XSI_SHIFT                      14
+#define XSI_SIZE                       (1 << XSI_SHIFT)
 /* Log size of mapped_regs area (64 KB - only 4KB is used).  */
-#define XMAPPEDREGS_LOG_SIZE   16
+#define XMAPPEDREGS_SHIFT              12
 /* Offset of XASI (Xen arch shared info) wrt XSI_BASE.  */
 #define XMAPPEDREGS_OFS                        XSI_SIZE